rc = HYPERVISOR_memory_op(
XENMEM_increase_reservation, &reservation);
if (rc < nr_pages) {
+ int ret;
/* We hit the Xen hard limit: reprobe. */
reservation.extent_start = mfn_list;
reservation.nr_extents = rc;
- BUG_ON(HYPERVISOR_memory_op(
- XENMEM_decrease_reservation,
- &reservation) != rc);
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation);
+ BUG_ON(ret != rc);
hard_limit = current_pages + rc - driver_pages;
goto out;
}
xen_machphys_update(mfn_list[i], pfn);
/* Link back into the page tables if not highmem. */
- if (pfn < max_low_pfn)
- BUG_ON(HYPERVISOR_update_va_mapping(
+ if (pfn < max_low_pfn) {
+ int ret;
+ ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte_ma(mfn_list[i], PAGE_KERNEL),
- 0));
+ 0);
+ BUG_ON(ret);
+ }
/* Relinquish the page back to the allocator. */
ClearPageReserved(page);
struct page *page;
void *v;
int need_sleep = 0;
+ int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
if (!PageHighMem(page)) {
v = phys_to_virt(pfn << PAGE_SHIFT);
scrub_pages(v, 1);
- BUG_ON(HYPERVISOR_update_va_mapping(
- (unsigned long)v, __pte_ma(0), 0));
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)v, __pte_ma(0), 0);
+ BUG_ON(ret);
}
#ifdef CONFIG_XEN_SCRUB_PAGES
else {
reservation.extent_start = mfn_list;
reservation.nr_extents = nr_pages;
- BUG_ON(HYPERVISOR_memory_op(
- XENMEM_decrease_reservation, &reservation) != nr_pages);
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+ BUG_ON(ret != nr_pages);
current_pages -= nr_pages;
totalram_pages = current_pages;
pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
{
unsigned long mfn = pte_mfn(*pte);
+ int ret;
struct xen_memory_reservation reservation = {
.extent_start = &mfn,
.nr_extents = 1,
set_pte_at(&init_mm, addr, pte, __pte_ma(0));
phys_to_machine_mapping[__pa(addr) >> PAGE_SHIFT] =
INVALID_P2M_ENTRY;
- BUG_ON(HYPERVISOR_memory_op(
- XENMEM_decrease_reservation, &reservation) != 1);
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+ BUG_ON(ret != 1);
return 0;
}
{
unsigned long vstart, flags;
unsigned int order = get_order(nr_pages * PAGE_SIZE);
+ int ret;
vstart = __get_free_pages(GFP_KERNEL, order);
if (vstart == 0)
scrub_pages(vstart, 1 << order);
balloon_lock(flags);
- BUG_ON(generic_page_range(
- &init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL));
+ ret = generic_page_range(
+ &init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL);
+ BUG_ON(ret);
current_pages -= 1UL << order;
balloon_unlock(flags);
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0;
u16 handle;
+ int ret;
for (i = 0; i < nr_pages; i++) {
handle = pending_handle(idx, i);
invcount++;
}
- BUG_ON(HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, unmap, invcount));
+ ret = HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, unmap, invcount);
+ BUG_ON(ret);
}
struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int nbio = 0;
request_queue_t *q;
+ int ret;
/* Check that number of segments is sane. */
nseg = req->nr_segments;
map[i].flags |= GNTMAP_readonly;
}
- BUG_ON(HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref, map, nseg));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
+ BUG_ON(ret);
for (i = 0; i < nseg; i++) {
if (unlikely(map[i].handle < 0)) {
{
int i;
struct page *page;
+ int ret;
blkif_interface_init();
spin_lock_init(&blkio_schedule_list_lock);
INIT_LIST_HEAD(&blkio_schedule_list);
- BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
+ ret = kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES);
+ BUG_ON(ret < 0);
blkif_xenbus_init();
static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
{
struct gnttab_map_grant_ref op;
+ int ret;
op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
op.flags = GNTMAP_host_map;
op.dom = blkif->domid;
lock_vm_area(blkif->blk_ring_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
unlock_vm_area(blkif->blk_ring_area);
+ BUG_ON(ret);
if (op.handle < 0) {
DPRINTK(" Grant table operation failure !\n");
static void unmap_frontend_page(blkif_t *blkif)
{
struct gnttab_unmap_grant_ref op;
+ int ret;
op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
op.handle = blkif->shmem_handle;
op.dev_bus_addr = 0;
lock_vm_area(blkif->blk_ring_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
unlock_vm_area(blkif->blk_ring_area);
+ BUG_ON(ret);
}
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
+ int ret;
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
DPRINTK("Bad return from blkdev data "
"request: %x\n", bret->status);
- BUG_ON(end_that_request_first(
+ ret = end_that_request_first(
req, (bret->status == BLKIF_RSP_OKAY),
- req->hard_nr_sectors));
+ req->hard_nr_sectors);
+ BUG_ON(ret);
end_that_request_last(req);
break;
default:
unsigned int i, op = 0;
struct grant_handle_pair *handle;
unsigned long ptep;
+ int ret;
for ( i = 0; i < nr_pages; i++)
{
BLKTAP_INVALIDATE_HANDLE(handle);
}
- BUG_ON(HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, unmap, op));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, op);
+ BUG_ON(ret);
if (blktap_vma != NULL)
zap_page_range(blktap_vma,
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
int op, ret;
unsigned int nseg;
+ int retval;
/* Check that number of segments is sane. */
nseg = req->nr_segments;
op++;
}
- BUG_ON(HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref, map, op));
+ retval = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
+ BUG_ON(retval);
op = 0;
for (i = 0; i < (req->nr_segments*2); i += 2) {
spin_lock_init(&blkio_schedule_list_lock);
INIT_LIST_HEAD(&blkio_schedule_list);
- BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
+ i = kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES);
+ BUG_ON(i<0);
blkif_xenbus_init();
static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
{
struct gnttab_map_grant_ref op;
+ int ret;
op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
op.flags = GNTMAP_host_map;
op.dom = blkif->domid;
lock_vm_area(blkif->blk_ring_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
unlock_vm_area(blkif->blk_ring_area);
+ BUG_ON(ret);
if (op.handle < 0) {
DPRINTK(" Grant table operation failure !\n");
static void unmap_frontend_page(blkif_t *blkif)
{
struct gnttab_unmap_grant_ref op;
+ int ret;
op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
op.handle = blkif->shmem_handle;
op.dev_bus_addr = 0;
lock_vm_area(blkif->blk_ring_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
unlock_vm_area(blkif->blk_ring_area);
+ BUG_ON(ret);
}
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
case IOCTL_EVTCHN_UNBIND: {
struct ioctl_evtchn_unbind unbind;
+ int ret;
rc = -EFAULT;
if (copy_from_user(&unbind, (void *)arg, sizeof(unbind)))
op.cmd = EVTCHNOP_close;
op.u.close.port = unbind.port;
- BUG_ON(HYPERVISOR_event_channel_op(&op));
+ ret = HYPERVISOR_event_channel_op(&op);
+ BUG_ON(ret);
rc = 0;
break;
for (i = 0; i < NR_EVENT_CHANNELS; i++)
{
+ int ret;
if (port_user[i] != u)
continue;
op.cmd = EVTCHNOP_close;
op.u.close.port = i;
- BUG_ON(HYPERVISOR_event_channel_op(&op));
+ ret = HYPERVISOR_event_channel_op(&op);
+ BUG_ON(ret);
}
spin_unlock_irq(&port_user_lock);
netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
{
struct gnttab_map_grant_ref op;
+ int ret;
op.host_addr = (unsigned long)netif->comms_area->addr;
op.flags = GNTMAP_host_map;
op.dom = netif->domid;
lock_vm_area(netif->comms_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
unlock_vm_area(netif->comms_area);
+ BUG_ON(ret);
if (op.handle < 0) {
DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
op.dom = netif->domid;
lock_vm_area(netif->comms_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
unlock_vm_area(netif->comms_area);
+ BUG_ON(ret);
if (op.handle < 0) {
DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
static void unmap_frontend_pages(netif_t *netif)
{
struct gnttab_unmap_grant_ref op;
+ int ret;
op.host_addr = (unsigned long)netif->comms_area->addr;
op.handle = netif->tx_shmem_handle;
op.dev_bus_addr = 0;
lock_vm_area(netif->comms_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
unlock_vm_area(netif->comms_area);
+ BUG_ON(ret);
op.host_addr = (unsigned long)netif->comms_area->addr + PAGE_SIZE;
op.handle = netif->rx_shmem_handle;
op.dev_bus_addr = 0;
lock_vm_area(netif->comms_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
unlock_vm_area(netif->comms_area);
+ BUG_ON(ret);
}
int netif_map(netif_t *netif, unsigned long tx_ring_ref,
spin_lock_irqsave(&mfn_lock, flags);
if ( alloc_index != MAX_MFN_ALLOC )
mfn_list[alloc_index++] = mfn;
- else
- BUG_ON(HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &reservation) != 1);
+ else {
+ int ret;
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation);
+ BUG_ON(ret != 1);
+ }
spin_unlock_irqrestore(&mfn_lock, flags);
}
#endif
*/
if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
int hlen = skb->data - skb->head;
+ int ret;
struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
if ( unlikely(nskb == NULL) )
goto drop;
skb_reserve(nskb, hlen);
__skb_put(nskb, skb->len);
- BUG_ON(skb_copy_bits(skb, -hlen, nskb->data - hlen,
- skb->len + hlen));
+ ret = skb_copy_bits(skb, -hlen, nskb->data - hlen,
+ skb->len + hlen);
+ BUG_ON(ret);
nskb->dev = skb->dev;
nskb->proto_csum_valid = skb->proto_csum_valid;
dev_kfree_skb(skb);
struct sk_buff *skb;
u16 notify_list[NETIF_RX_RING_SIZE];
int notify_nr = 0;
+ int ret;
skb_queue_head_init(&rxq);
mcl++;
mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
- BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
+ ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+ BUG_ON(ret != 0);
mcl = rx_mcl;
if( HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op,
u16 pending_idx;
PEND_RING_IDX dc, dp;
netif_t *netif;
+ int ret;
dc = dealloc_cons;
dp = dealloc_prod;
gop->handle = grant_tx_ref[pending_idx];
gop++;
}
- BUG_ON(HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
+ ret = HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
+ BUG_ON(ret);
while (dealloc_cons != dp) {
pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
NETIF_RING_IDX i;
gnttab_map_grant_ref_t *mop;
unsigned int data_len;
+ int ret;
if (dealloc_cons != dealloc_prod)
net_tx_action_dealloc();
if (mop == tx_map_ops)
return;
- BUG_ON(HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
+ ret = HYPERVISOR_grant_table_op(
+ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
+ BUG_ON(ret);
mop = tx_map_ops;
while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
static int
map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
{
+ int ret;
struct gnttab_map_grant_ref op = {
.host_addr = (unsigned long)tpmif->tx_area->addr,
.flags = GNTMAP_host_map,
};
lock_vm_area(tpmif->tx_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
unlock_vm_area(tpmif->tx_area);
+ BUG_ON(ret);
if (op.handle < 0) {
DPRINTK(" Grant table operation failure !\n");
unmap_frontend_page(tpmif_t *tpmif)
{
struct gnttab_unmap_grant_ref op;
+ int ret;
op.host_addr = (unsigned long)tpmif->tx_area->addr;
op.handle = tpmif->shmem_handle;
op.dev_bus_addr = 0;
lock_vm_area(tpmif->tx_area);
- BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
unlock_vm_area(tpmif->tx_area);
+ BUG_ON(ret);
}
int
void free_vm_area(struct vm_struct *area)
{
- BUG_ON(remove_vm_area(area->addr) != area);
+ struct vm_struct *ret;
+ ret = remove_vm_area(area->addr);
+ BUG_ON(ret != area);
kfree(area);
}
unsigned long page;
evtchn_op_t op = { 0 };
+ int ret;
/* Allocate page. */
op.u.alloc_unbound.dom = DOMID_SELF;
op.u.alloc_unbound.remote_dom = 0;
- BUG_ON(HYPERVISOR_event_channel_op(&op));
+ ret = HYPERVISOR_event_channel_op(&op);
+ BUG_ON(ret);
xen_start_info->store_evtchn = op.u.alloc_unbound.port;
/* And finally publish the above info in /proc/xen */